Features to pause domains (and repin them) synchronously.
sys.exit(-1)
cpu = int(sys.argv[3])
- orig_state = xc.domain_getinfo(first_dom=dom, max_doms=1)[0]['stopped']
-
- while xc.domain_getinfo(first_dom=dom, max_doms=1)[0]['stopped'] != 1:
- xc.domain_stop( dom=dom )
- time.sleep(0.1)
-
+
rc = xc.domain_pincpu( dom, cpu )
- if orig_state == 0:
- xc.domain_start( dom=dom )
-
elif cmd == 'list':
print 'Dom Name Mem(kb) CPU State Time(s)'
for domain in xc.domain_getinfo():
case DOM0_PINCPUDOMAIN:
{
- struct task_struct * p = find_domain_by_id(op->u.pincpudomain.domain);
- int cpu = op->u.pincpudomain.cpu;
- ret = -EINVAL;
- if ( p != NULL )
+ domid_t dom = op->u.pincpudomain.domain;
+
+ if ( dom == current->domain || dom == IDLE_DOMAIN_ID )
+ ret = -EINVAL;
+ else
{
- if ( cpu == -1 )
- {
- p->cpupinned = 0;
- ret = 0;
- }
- else
+ struct task_struct * p = find_domain_by_id(dom);
+ int cpu = op->u.pincpudomain.cpu;
+ int we_paused = 0;
+
+ ret = -ESRCH;
+
+ if ( p != NULL )
{
- /* For the moment, we are unable to move running
- domains between CPUs. (We need a way of synchronously
- stopping running domains). For now, if we discover the
- domain is not stopped already then cowardly bail out
- with ENOSYS */
-
- if( !(p->state & TASK_STOPPED) )
+ if ( cpu == -1 )
{
- ret = -ENOSYS;
- }
+ p->cpupinned = 0;
+ ret = 0;
+ }
else
{
- /* We need a task structure lock here!!!
- FIX ME!! */
- cpu = cpu % smp_num_cpus;
- p->processor = cpu;
- p->cpupinned = 1;
+ /* Pause domain if necessary. */
+ if( !(p->state & TASK_STOPPED) && !(p->state & TASK_PAUSED) )
+ {
+ sched_pause_sync(p);
+ we_paused = 1;
+ }
+
+ /* We need a task structure lock here!!!
+ FIX ME!! */
+ cpu = cpu % smp_num_cpus;
+ p->processor = cpu;
+ p->cpupinned = 1;
+
+ if ( we_paused )
+ wake_up(p);
+
ret = 0;
}
- }
- put_task_struct(p);
- }
+ put_task_struct(p);
+ }
+ }
}
break;
return 0;
}
+static void bvt_pause(struct task_struct *p)
+{
+ if( __task_on_runqueue(p) )
+ __del_from_runqueue(p);
+}
struct scheduler sched_bvt_def = {
.name = "Borrowed Virtual Time",
.dump_settings = bvt_dump_settings,
.dump_cpu_state = bvt_dump_cpu_state,
.dump_runq_el = bvt_dump_runq_el,
+ .pause = bvt_pause,
};
printk("rr_slice = %llu ", rr_slice);
}
+static void rr_pause(struct task_struct *p)
+{
+ if ( __task_on_runqueue(p) )
+ __del_from_runqueue(p);
+}
+
struct scheduler sched_rrobin_def = {
.name = "Round-Robin Scheduler",
.opt_name = "rrobin",
.do_schedule = rr_do_schedule,
.control = rr_ctl,
.dump_settings = rr_dump_settings,
+ .pause = rr_pause,
};
return 1;
}
-
void init_idle_task(void)
{
unsigned long flags;
return ret;
}
+
+/* sched_pause_sync - synchronously pause a domain's execution */
+void sched_pause_sync(struct task_struct *p)
+{
+ unsigned long flags;
+ int cpu = p->processor;
+
+ spin_lock_irqsave(&schedule_lock[cpu], flags);
+
+ if ( schedule_data[cpu].curr != p )
+ /* if not the current task, we can remove it from scheduling now */
+ SCHED_FN(pause, p);
+
+ p->state = TASK_PAUSED;
+
+ spin_unlock_irqrestore(&schedule_lock[cpu], flags);
+
+ /* spin until domain is descheduled by its local scheduler */
+ while ( schedule_data[cpu].curr == p )
+ {
+ set_bit(_HYP_EVENT_NEED_RESCHED, &p->hyp_events);
+ hyp_event_notify(1 << cpu);
+ do_yield();
+ }
+
+
+ /* The domain will not be scheduled again until we do a wake_up(). */
+}
+
/* Per-domain one-shot-timer hypercall. */
long do_set_timer_op(unsigned long timeout_hi, unsigned long timeout_lo)
{
void (*dump_settings) (void);
void (*dump_cpu_state) (int);
void (*dump_runq_el) (struct task_struct *);
+ void (*pause) (struct task_struct *);
};
/* per CPU scheduler information */
* arbitrary event or timer.
* TASK_STOPPED: Domain is stopped.
* TASK_DYING: Domain is about to cross over to the land of the dead.
+ * TASK_PAUSED: Task currently removed from scheduling.
*/
#define TASK_RUNNING 0
#define TASK_UNINTERRUPTIBLE 2
#define TASK_STOPPED 4
#define TASK_DYING 8
-#define TASK_SCHED_PRIV 16
+#define TASK_PAUSED 16
#include <asm/uaccess.h> /* for KERNEL_DS */
int sched_rem_domain(struct task_struct *p);
long sched_ctl(struct sched_ctl_cmd *);
long sched_adjdom(struct sched_adjdom_cmd *);
+void sched_pause_sync(struct task_struct *);
void init_idle_task(void);
void __wake_up(struct task_struct *p);
void wake_up(struct task_struct *p);